Code
import polars as pl
import plotly.express as px
import plotly.io as pio
import matplotlib.pyplot as plt
pio.renderers.default = "notebook"Mark Wang
March 21, 2024
layout_config = dict(
width=800,
height=500,
template="simple_white",
plot_bgcolor="white",
paper_bgcolor="white",
# xaxis=dict(
# showline=True,
# linewidth=1,
# linecolor="black",
# mirror=True,
# showgrid=True,
# ),
# yaxis=dict(
# showline=True,
# linewidth=1,
# linecolor="black",
# mirror=True,
# showgrid=True,
# ),
)
fig = df.pipe(px.histogram, x="limit_bal", nbins=40, color="y", histnorm="density")
fig.update_layout(
title=dict(text="Distribution of Balance", x=0.5, xanchor="center"), **layout_config
)
fig.update_layout(bargap=0.3, xaxis_tickformat=",d")
fig.show()How to evaluate the performance of the binning?
Binning is required to for WOE transformation, IV and PSI calculation. It has many advantages - Robustness (against outliers)
More or less bins? - More bins - Less information lost, less bias - Less bins - More stable
The easiest cut is qcut
def calculate_woe(df, x, y):
df_woe = (
df.group_by(x)
.agg(
pl.len().alias("obs"),
pl.col(y).eq(0).sum().alias("y=0"),
pl.col(y).sum().alias("y=1"),
)
.with_columns(
(pl.col("y=0", "y=1") / pl.col("y=0", "y=1").sum()).name.suffix("_rate"),
)
.with_columns(
(pl.col("y=1_rate") / pl.col("y=0_rate")).log().alias("woe"),
)
.with_columns(
(pl.col("y=1_rate") - pl.col("y=0_rate")).mul(pl.col("woe")).alias("iv")
)
.pipe(lambda x: x.sort(x.columns[0]))
)
return df_woe5 Bins
from plotly import graph_objects as go
from plotly.subplots import make_subplots
subplot_titles = [
"WoE(5 bins)",
"WoE(10 bins)",
"WoE(20 bins)",
"Obs(5 bins)",
"Obs(10 bins)",
"Obs(20 bins)",
]
fig = make_subplots(
rows=2,
cols=3,
subplot_titles=subplot_titles,
horizontal_spacing=0.05,
vertical_spacing=0.15,
shared_yaxes=True,
shared_xaxes=True,
)
for i, n_bins in enumerate([5, 10, 20], start=1):
df_woe_limit_bal = df_train.pipe(
calculate_woe,
pl.col("limit_bal").truediv(1_000).qcut(n_bins, allow_duplicates=True),
"y",
)
fig.add_trace(
go.Scatter(
mode="lines+markers",
x=df_woe_limit_bal["limit_bal"],
y=df_woe_limit_bal["woe"],
name=f"WoE({n_bins} bins)",
),
row=1,
col=i,
)
# Observation count plots in second row
fig.add_trace(
go.Bar(
x=df_woe_limit_bal["limit_bal"],
y=df_woe_limit_bal["obs"],
name=f"Obs({n_bins} bins)",
opacity=0.5,
),
row=2,
col=i,
)
fig.update_layout(
template="simple_white",
height=500,
width=900,
showlegend=False,
title=dict(
text="WoE and Observation Counts with different number of bins",
x=0.5,
xanchor="center",
),
)
fig.update_xaxes(tickangle=90)
fig.show()Bias-variance trade off
More bins - Less bias - More variance - Less monotonic - Less stable (PSI)
def calculate_psi(expected: pl.Series, actual: pl.Series) -> float:
expected_dist = expected.rename("x").value_counts().rename({"count": "count_exp"})
actual_dist = actual.rename("x").value_counts().rename({"count": "count_act"})
psi = (
expected_dist.join(actual_dist, on="x", how="full", coalesce=True)
.with_columns(
(pl.col("count_exp") / pl.col("count_exp").sum())
.fill_null(0.0001)
.alias("pct_exp"),
(pl.col("count_act") / pl.col("count_act").sum())
.fill_null(0.0001)
.alias("pct_act"),
)
.with_columns(
psi=(
(pl.col("pct_act") - pl.col("pct_exp"))
* (pl.col("pct_act") / pl.col("pct_exp")).log()
)
)
)
return psiresult = []
for q in range(2, 30):
breaks = get_qcut_breaks(df_train["limit_bal"], q=q)
iv = calculate_woe(df_train, pl.col("limit_bal").cut(breaks), "y")
psi = calculate_psi(
df_train["limit_bal"].cut(breaks), df_valid["limit_bal"].cut(breaks)
)
result.append(dict(q=q, iv=iv["iv"].sum(), psi=psi["psi"].sum()))| q | iv | psi |
|---|---|---|
| i64 | f64 | f64 |
| 2 | 0.12329 | 0.000028 |
| 3 | 0.138538 | 0.00007 |
| 4 | 0.162616 | 0.000181 |
| 5 | 0.156567 | 0.00018 |
| 6 | 0.164844 | 0.000429 |
| 7 | 0.165888 | 0.00086 |
| 8 | 0.17931 | 0.000432 |
| 9 | 0.182535 | 0.000539 |
| 10 | 0.186456 | 0.000293 |
| 11 | 0.184024 | 0.000561 |
Init signature:
DecisionTreeClassifier(
*,
criterion='gini',
splitter='best',
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
class_weight=None,
ccp_alpha=0.0,
monotonic_cst=None,
)
Docstring:
A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : {"gini", "entropy", "log_loss"}, default="gini"
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "log_loss" and "entropy" both for the
Shannon information gain, see :ref:`tree_mathematical_formulation`.
splitter : {"best", "random"}, default="best"
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float or {"sqrt", "log2"}, default=None
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`max(1, int(max_features * n_features_in_))` features are considered at
each split.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the estimator. The features are always
randomly permuted at each split, even if ``splitter`` is set to
``"best"``. When ``max_features < n_features``, the algorithm will
select ``max_features`` at random at each split before finding the best
split among them. But the best found split may vary across different
runs, even if ``max_features=n_features``. That is the case, if the
improvement of the criterion is identical for several splits and one
split has to be selected at random. To obtain a deterministic behaviour
during fitting, ``random_state`` has to be fixed to an integer.
See :term:`Glossary <random_state>` for details.
max_leaf_nodes : int, default=None
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
class_weight : dict, list of dict or "balanced", default=None
Weights associated with classes in the form ``{class_label: weight}``.
If None, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
monotonic_cst : array-like of int of shape (n_features), default=None
Indicates the monotonicity constraint to enforce on each feature.
- 1: monotonic increase
- 0: no constraint
- -1: monotonic decrease
If monotonic_cst is None, no constraints are applied.
Monotonicity constraints are not supported for:
- multiclass classifications (i.e. when `n_classes > 2`),
- multioutput classifications (i.e. when `n_outputs_ > 1`),
- classifications trained on data with missing values.
The constraints hold over the probability of the positive class.
Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
.. versionadded:: 1.4
Attributes
----------
classes_ : ndarray of shape (n_classes,) or list of ndarray
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
max_features_ : int
The inferred value of max_features.
n_classes_ : int or list of int
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree instance
The underlying Tree object. Please refer to
``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and
:ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`
for basic usage of these attributes.
See Also
--------
DecisionTreeRegressor : A decision tree regressor.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The :meth:`predict` method operates using the :func:`numpy.argmax`
function on the outputs of :meth:`predict_proba`. This means that in
case the highest predicted probabilities are tied, the classifier will
predict the tied class with the lowest index in :term:`classes_`.
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
File: ~/.pyenv/versions/3.10.13/lib/python3.10/site-packages/sklearn/tree/_classes.py
Type: ABCMeta
Subclasses: ExtraTreeClassifier
Init signature:
DecisionTreeClassifier(
*,
criterion='gini',
splitter='best',
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
class_weight=None,
ccp_alpha=0.0,
monotonic_cst=None,
)
Docstring:
A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : {"gini", "entropy", "log_loss"}, default="gini"
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "log_loss" and "entropy" both for the
Shannon information gain, see :ref:`tree_mathematical_formulation`.
splitter : {"best", "random"}, default="best"
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float or {"sqrt", "log2"}, default=None
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`max(1, int(max_features * n_features_in_))` features are considered at
each split.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the estimator. The features are always
randomly permuted at each split, even if ``splitter`` is set to
``"best"``. When ``max_features < n_features``, the algorithm will
select ``max_features`` at random at each split before finding the best
split among them. But the best found split may vary across different
runs, even if ``max_features=n_features``. That is the case, if the
improvement of the criterion is identical for several splits and one
split has to be selected at random. To obtain a deterministic behaviour
during fitting, ``random_state`` has to be fixed to an integer.
See :term:`Glossary <random_state>` for details.
max_leaf_nodes : int, default=None
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
class_weight : dict, list of dict or "balanced", default=None
Weights associated with classes in the form ``{class_label: weight}``.
If None, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
monotonic_cst : array-like of int of shape (n_features), default=None
Indicates the monotonicity constraint to enforce on each feature.
- 1: monotonic increase
- 0: no constraint
- -1: monotonic decrease
If monotonic_cst is None, no constraints are applied.
Monotonicity constraints are not supported for:
- multiclass classifications (i.e. when `n_classes > 2`),
- multioutput classifications (i.e. when `n_outputs_ > 1`),
- classifications trained on data with missing values.
The constraints hold over the probability of the positive class.
Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
.. versionadded:: 1.4
Attributes
----------
classes_ : ndarray of shape (n_classes,) or list of ndarray
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
max_features_ : int
The inferred value of max_features.
n_classes_ : int or list of int
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree instance
The underlying Tree object. Please refer to
``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and
:ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`
for basic usage of these attributes.
See Also
--------
DecisionTreeRegressor : A decision tree regressor.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The :meth:`predict` method operates using the :func:`numpy.argmax`
function on the outputs of :meth:`predict_proba`. This means that in
case the highest predicted probabilities are tied, the classifier will
predict the tied class with the lowest index in :term:`classes_`.
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
File: ~/.pyenv/versions/3.10.13/lib/python3.10/site-packages/sklearn/tree/_classes.py
Type: ABCMeta
Subclasses: ExtraTreeClassifier
DecisionTreeClassifier(criterion='entropy', min_samples_leaf=0.05,
monotonic_cst=[-1])In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. DecisionTreeClassifier(criterion='entropy', min_samples_leaf=0.05,
monotonic_cst=[-1])
| limit_bal | sex | education | marriage | age | pay_0 | pay_2 | pay_3 | pay_4 | pay_5 | pay_6 | bill_amt1 | bill_amt2 | bill_amt3 | bill_amt4 | bill_amt5 | bill_amt6 | pay_amt1 | pay_amt2 | pay_amt3 | pay_amt4 | pay_amt5 | pay_amt6 | y |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 |
| 20000 | 2 | 2 | 1 | 24 | 2 | 2 | -1 | -1 | -2 | -2 | 3913 | 3102 | 689 | 0 | 0 | 0 | 0 | 689 | 0 | 0 | 0 | 0 | 1 |
| 120000 | 2 | 2 | 2 | 26 | -1 | 2 | 0 | 0 | 0 | 2 | 2682 | 1725 | 2682 | 3272 | 3455 | 3261 | 0 | 1000 | 1000 | 1000 | 0 | 2000 | 1 |
| 90000 | 2 | 2 | 2 | 34 | 0 | 0 | 0 | 0 | 0 | 0 | 29239 | 14027 | 13559 | 14331 | 14948 | 15549 | 1518 | 1500 | 1000 | 1000 | 1000 | 5000 | 0 |
| 50000 | 2 | 2 | 1 | 37 | 0 | 0 | 0 | 0 | 0 | 0 | 46990 | 48233 | 49291 | 28314 | 28959 | 29547 | 2000 | 2019 | 1200 | 1100 | 1069 | 1000 | 0 |
| 50000 | 1 | 2 | 1 | 57 | -1 | 0 | -1 | 0 | 0 | 0 | 8617 | 5670 | 35835 | 20940 | 19146 | 19131 | 2000 | 36681 | 10000 | 9000 | 689 | 679 | 0 |
| … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … |
| 220000 | 1 | 3 | 1 | 39 | 0 | 0 | 0 | 0 | 0 | 0 | 188948 | 192815 | 208365 | 88004 | 31237 | 15980 | 8500 | 20000 | 5003 | 3047 | 5000 | 1000 | 0 |
| 150000 | 1 | 3 | 2 | 43 | -1 | -1 | -1 | -1 | 0 | 0 | 1683 | 1828 | 3502 | 8979 | 5190 | 0 | 1837 | 3526 | 8998 | 129 | 0 | 0 | 0 |
| 30000 | 1 | 2 | 2 | 37 | 4 | 3 | 2 | -1 | 0 | 0 | 3565 | 3356 | 2758 | 20878 | 20582 | 19357 | 0 | 0 | 22000 | 4200 | 2000 | 3100 | 1 |
| 80000 | 1 | 3 | 1 | 41 | 1 | -1 | 0 | 0 | 0 | -1 | -1645 | 78379 | 76304 | 52774 | 11855 | 48944 | 85900 | 3409 | 1178 | 1926 | 52964 | 1804 | 1 |
| 50000 | 1 | 2 | 1 | 46 | 0 | 0 | 0 | 0 | 0 | 0 | 47929 | 48905 | 49764 | 36535 | 32428 | 15313 | 2078 | 1800 | 1430 | 1000 | 1000 | 1000 | 1 |